home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Aminet 5
/
Aminet 5 - March 1995.iso
/
Aminet
/
mus
/
edit
/
AlgoRhythms.lha
/
AlgoRhythms
/
Source
/
audio.c
< prev
next >
Wrap
C/C++ Source or Header
|
1994-11-25
|
45KB
|
1,393 lines
/*
** Audio.C
Copyright (c) 1993 by Thomas E. Janzen
All Rights Reserved
THIS SOFTWARE IS FURNISHED FREE OF CHARGE FOR STUDY AND USE AND MAY
BE COPIED ONLY FOR PERSONAL USE OR COMPLETELY AS OFFERED WITH NO
CHANGES FOR FREE DISTRIBUTION. NO TITLE TO AND OWNERSHIP OF THE
SOFTWARE IS HEREBY TRANSFERRED. THOMAS E. JANZEN ASSUMES NO
RESPONSIBILITY FOR THE USE OR RELIABILITY OF THIS SOFTWARE.
Thomas E. Janzen
208A Olde Derby Road
Norwood, MA 02062-1761
(617)769-7733
tej@world.std.com
** FACILITY:
**
** AlgoRhythms music improviser on Commodore (TM) Amiga (TM)
** compiled with SAS/C Amiga Compiler 6.50
**
** ABSTRACT:
**
** audio.c contains functions for supporting internal Amiga audio.
**
** AUTHORS: Thomas E. Janzen
**
** CREATION DATE: 04-JUL-1993
**
** MODIFICATION HISTORY:
** DATE NAME DESCRIPTION
** 1 JAN 94 T. Janzen New for 3.0.
*/
#define INTUI_V36_NAMES_ONLY
#include <stdlib.h>
#include <string.h>
#include <math.h>
#ifdef CLI
#include <stdio.h>
#endif
#include <stdio.h>
#include <limits.h>
#include <exec/types.h>
#include <exec/nodes.h>
#include <exec/lists.h>
#include <exec/ports.h>
#include <exec/libraries.h>
#include <exec/devices.h>
#include <exec/io.h>
#include <intuition/intuition.h>
#include <libraries/gadtools.h>
#include <proto/dos.h>
#include <proto/graphics.h>
#include <proto/exec.h>
#include <proto/mathffp.h>
#include <proto/intuition.h>
#include <proto/gadtools.h>
#include <exec/memory.h>
#include <devices/audio.h>
#include <dos/dosextens.h>
#include <graphics/gfxbase.h>
#include <libraries/dos.h>
#include <libraries/iffparse.h>
#include <proto/iffparse.h>
#include <clib/iffparse_protos.h>
#include <clib/exec_protos.h>
#include <clib/alib_protos.h>
#include <clib/dos_protos.h>
#include <clib/iffparse_protos.h>
#include <clib/graphics_protos.h>
#include <iff/8svx.h>
#include <iff/8svxapp.h>
#include <exec/errors.h>
#include <Libraries/asl.h> /* asl */
#include <proto/asl.h> /* asl */
#include "Window.h"
#include "AlgoRhythms.h"
#include "audio.h"
#include "Record.h"
#define C_NOTES_PER_OCTAVE (12)
#define C_OCTAVES_QTY (11)
#define C_NOTES_TOTAL (C_NOTES_PER_OCTAVE * C_OCTAVES_QTY)
#define C_LOW_C (16.3516 / 2.0)
#define C_AUDIO_QTY (4)
#define C_INSTR_QTY (17) /* 17 is the default voice */
#define C_DFLT_AUDIO (C_INSTR_QTY - 1) /* 17 is the default voice */
#define C_ONESHOT_SAMP (4)
#define C_REPEAT_SAMP (4)
#define C_TYP_OCTAVES (5)
#define MINARGS (2)
#define ID_CHAN MakeID('C', 'H', 'A', 'N')
#define RIGHT (4)
#define LEFT (2)
#define BOTH (RIGHT | LEFT)
#define C_CHUNKS_QTY (8)
#define M_CHAN0_R (1 << 0)
#define M_CHAN1_L (1 << 1)
#define M_CHAN2_L (1 << 2)
#define M_CHAN3_R (1 << 3)
#define C_INST_NAME_LEN (56L)
#define GAD_BUTTON_1 (16L)
#define GAD_BUTTON_2 (17L)
#define GAD_BUTTON_3 (18L)
#define GAD_BUTTON_4 (19L)
#define GAD_BUTTON_5 (20L)
#define GAD_BUTTON_6 (21L)
#define GAD_BUTTON_7 (22L)
#define GAD_BUTTON_8 (23L)
#define GAD_BUTTON_9 (24L)
#define GAD_BUTTON_10 (25L)
#define GAD_BUTTON_11 (26L)
#define GAD_BUTTON_12 (27L)
#define GAD_BUTTON_13 (28L)
#define GAD_BUTTON_14 (29L)
#define GAD_BUTTON_15 (30L)
#define GAD_BUTTON_16 (31L)
#define GAD_LAST (32L)
#define C_API_QUIET (1)
#define C_API_PLAYING (2)
#define C_OWNER_ME (1)
#define C_OWNER_THEM (2)
#define T_DFLT_NAME "DEFAULT SAWTOOTH"
static const channels_ary[C_AUDIO_QTY]
= {M_CHAN0_R, M_CHAN1_L, M_CHAN2_L, M_CHAN3_R};
static struct Window *orch_wind = NULL;
static struct Gadget *glist = NULL,
*orch_gads[GAD_LAST];
ULONG orch_mask = 0;
#ifndef CLI
static struct Gadget *create_orch_gadgets(UWORD topborder);
#endif
static void calculate_frequency_table(double []);
static void init_audio_instruments(void);
static UBYTE whichannel[]= {(UBYTE) M_CHAN0_R, (UBYTE) M_CHAN1_L,
(UBYTE) M_CHAN2_L, (UBYTE) M_CHAN3_R};
static struct EightSVXInfo svx_info[C_INSTR_QTY];
static double frequency_table[C_NOTES_TOTAL];
static void de_init_audio_instruments(void);
static const unsigned int middle_c_num = 60L,
clock_ntsc = 3579545L,
clock_pal = 3546895L,
step_ideal = 1000L;
static struct MsgPort *audio_msg_port;
static struct Message *AudioMSG;
static struct IOAudio *aud_open;
static BYTE audio_device;
static unsigned int main_clock;
static char orch_str[C_INSTR_QTY][C_INST_NAME_LEN]
= {"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""};
typedef struct audio_port_io AUDIO_PORT_IO_TYPE;
struct audio_port_io
{
struct IOAudio *api_aud_alloc,
*api_aud_os,
*api_aud_rep,
*api_aud_free,
*api_aud_finish; /* could use ADIOF_SYNCCYCLE but it
** wouldn't work well with
** long one-shots */
struct MsgPort *api_port;
int api_status,
api_key; /* allocation key */
};
static AUDIO_PORT_IO_TYPE voices_aud[C_AUDIO_QTY];
void init_audio(void)
{
auto int ctr;
audio_msg_port = CreateMsgPort();
if (NULL == audio_msg_port)
{
gi_fubar = TRUE;
}
aud_open = (struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC | MEMF_CLEAR);
if (NULL == aud_open)
{
gi_fubar = TRUE;
}
aud_open->ioa_Request.io_Message.mn_ReplyPort = audio_msg_port;
aud_open->ioa_Request.io_Message.mn_Node.ln_Pri= 0;
aud_open->ioa_Request.io_Command = (UWORD)ADCMD_ALLOCATE;
aud_open->ioa_Request.io_Flags = (UBYTE)ADIOF_NOWAIT;
aud_open->ioa_AllocKey = (WORD) 0;
aud_open->ioa_Data = whichannel;
aud_open->ioa_Length = 0; /* open always succeeds with
** zero data length; no channels
** are allocated.
*/
/* ioa_Length is a ULONG */
audio_device
= OpenDevice(AUDIONAME, 0UL, (struct IORequest *) aud_open, 0L);
if (audio_device != 0)
{
gi_fubar = TRUE;
}
for (ctr = 0; ctr < C_AUDIO_QTY; ctr++)
{
/*
** Allocate public memory for the IOAudio structures.
*/
voices_aud[ctr].api_aud_alloc =
(struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC /* | MEMF_CLEAR */);
if (NULL == voices_aud[ctr].api_aud_alloc)
{
gi_fubar = 1;
break;
}
voices_aud[ctr].api_aud_os =
(struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC /* | MEMF_CLEAR */);
if (NULL == voices_aud[ctr].api_aud_os)
{
gi_fubar = 1;
break;
}
voices_aud[ctr].api_aud_rep =
(struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC /* | MEMF_CLEAR */);
if (NULL == voices_aud[ctr].api_aud_rep)
{
gi_fubar = 1;
break;
}
voices_aud[ctr].api_aud_free =
(struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC /* | MEMF_CLEAR */);
if (NULL == voices_aud[ctr].api_aud_free)
{
gi_fubar = 1;
break;
}
voices_aud[ctr].api_aud_finish =
(struct IOAudio *)AllocMem(sizeof(struct IOAudio),
MEMF_PUBLIC /* | MEMF_CLEAR */);
if (NULL == voices_aud[ctr].api_aud_finish)
{
gi_fubar = 1;
break;
}
voices_aud[ctr].api_port = CreateMsgPort();
if (NULL == voices_aud[ctr].api_port)
{
gi_fubar = 1;
break;
}
/*
** Initialize the allocate IOAudio struct
*/
voices_aud[ctr].api_aud_alloc->ioa_Request.io_Command
= (UWORD)ADCMD_ALLOCATE;
voices_aud[ctr].api_aud_alloc->ioa_Request.io_Flags
= (UBYTE)ADIOF_NOWAIT;
voices_aud[ctr].api_aud_alloc->ioa_Request.io_Device
= aud_open->ioa_Request.io_Device;
voices_aud[ctr].api_aud_alloc
->ioa_Request.io_Message.mn_ReplyPort
= voices_aud[ctr].api_port;
voices_aud[ctr].api_aud_alloc
->ioa_Request.io_Message.mn_Node.ln_Pri = 50;
voices_aud[ctr].api_aud_alloc->ioa_Data = whichannel;
voices_aud[ctr].api_aud_alloc->ioa_Length = sizeof whichannel;
/*
** Initialize the one-shot IOAudio struct
*/
voices_aud[ctr].api_aud_os->ioa_Request.io_Command
= (UWORD)CMD_WRITE;
voices_aud[ctr].api_aud_os->ioa_Request.io_Flags
= (UBYTE)ADIOF_PERVOL;
voices_aud[ctr].api_aud_os->ioa_Request.io_Device
= aud_open->ioa_Request.io_Device;
voices_aud[ctr].api_aud_os->ioa_AllocKey
= aud_open->ioa_AllocKey;
voices_aud[ctr].api_aud_os
->ioa_Request.io_Message.mn_ReplyPort
= voices_aud[ctr].api_port;
voices_aud[ctr].api_aud_os
->ioa_Request.io_Message.mn_Node.ln_Pri = 50;
voices_aud[ctr].api_aud_os->ioa_Cycles = (UWORD) 1;
/*
** Initialize the repeating sound IOAudio struct
*/
voices_aud[ctr].api_aud_rep->ioa_Request.io_Command
= (UWORD)CMD_WRITE;
voices_aud[ctr].api_aud_rep->ioa_Request.io_Flags
= (UBYTE)ADIOF_PERVOL;
voices_aud[ctr].api_aud_rep->ioa_Request.io_Device
= aud_open->ioa_Request.io_Device;
voices_aud[ctr].api_aud_rep->ioa_AllocKey
= aud_open->ioa_AllocKey;
voices_aud[ctr].api_aud_rep
->ioa_Request.io_Message.mn_ReplyPort
= voices_aud[ctr].api_port;
voices_aud[ctr].api_aud_rep
->ioa_Request.io_Message.mn_Node.ln_Pri = 50;
/*
** Initialize the free IOAudio struct
*/
voices_aud[ctr].api_aud_free->ioa_Request.io_Command
= (UWORD)ADCMD_FREE;
voices_aud[ctr].api_aud_free->ioa_Request.io_Device
= aud_open->ioa_Request.io_Device;
voices_aud[ctr].api_aud_free->ioa_AllocKey
= aud_open->ioa_AllocKey;
voices_aud[ctr].api_aud_free
->ioa_Request.io_Message.mn_ReplyPort
= voices_aud[ctr].api_port;
voices_aud[ctr].api_aud_free
->ioa_Request.io_Message.mn_Node.ln_Pri = 0;
/*
** Initialize the finish IOAudio struct
*/
voices_aud[ctr].api_aud_finish->ioa_Request.io_Command
= (UWORD)ADCMD_FINISH;
voices_aud[ctr].api_aud_finish->ioa_Request.io_Device
= aud_open->ioa_Request.io_Device;
voices_aud[ctr].api_aud_finish->ioa_AllocKey
= aud_open->ioa_AllocKey;
voices_aud[ctr].api_aud_finish
->ioa_Request.io_Message.mn_ReplyPort
= voices_aud[ctr].api_port;
voices_aud[ctr].api_aud_finish
->ioa_Request.io_Message.mn_Node.ln_Pri = 0;
/*
** Initialize the status to quiet.
*/
voices_aud[ctr].api_status = C_API_QUIET;
}
/*
** Whether this is a PAL or NTSC system, use the correct clock rate for
** calculating sample rates
*/
if (GfxBase->DisplayFlags & PAL)
{
main_clock = clock_pal;
}
else
{
main_clock = clock_ntsc;
}
calculate_frequency_table(frequency_table);
init_audio_instruments();
if (NULL == (IFFParseBase = OpenLibrary("iffparse.library", 37L)))
{
gi_fubar = TRUE;
}
return;
}
void de_init_audio(void)
{
auto int ctr;
auto struct IOAudio *free_temp;
de_init_audio_instruments();
while ((AudioMSG = GetMsg(audio_msg_port)))
{
;
}
for (ctr = 0; ctr < C_AUDIO_QTY; ctr++)
{
if (C_API_PLAYING == voices_aud[ctr].api_status)
{
free_temp = voices_aud[ctr].api_aud_free;
free_temp->ioa_Request.io_Flags = (UBYTE)IOF_QUICK;
/*
** Will NOT reply if QUICK is set.
*/
BeginIO((struct IORequest *) free_temp);
switch (free_temp->ioa_Request.io_Error)
{
case 0:
/* (free_temp->ioa_Request.io_Unit) = units freed */
break; /* success */
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
}
}
if (audio_msg_port != 0)
{
DeleteMsgPort(audio_msg_port);
}
/*
** Free channels before the Close
*/
for (ctr = 0; ctr < C_AUDIO_QTY; ctr++)
{
FreeMem(voices_aud[ctr].api_aud_alloc,
sizeof(struct IOAudio));
voices_aud[ctr].api_aud_alloc = NULL;
FreeMem(voices_aud[ctr].api_aud_os,
sizeof(struct IOAudio));
voices_aud[ctr].api_aud_os = NULL;
FreeMem(voices_aud[ctr].api_aud_rep,
sizeof(struct IOAudio));
voices_aud[ctr].api_aud_rep = NULL;
FreeMem(voices_aud[ctr].api_aud_free,
sizeof(struct IOAudio));
voices_aud[ctr].api_aud_free = NULL;
FreeMem(voices_aud[ctr].api_aud_finish,
sizeof(struct IOAudio));
voices_aud[ctr].api_aud_finish = NULL;
DeleteMsgPort(voices_aud[ctr].api_port);
}
/*
** Close the device
*/
if (0 == audio_device)
{
CloseDevice((struct IORequest *) aud_open);
/*
** The close should set deivce to -1;
** io_Unit is map of channesl to free, unit should be 0
*/
}
FreeMem(aud_open, sizeof(struct IOAudio));
aud_open = NULL;
if (IFFParseBase)
{
CloseLibrary(IFFParseBase);
}
return;
}
void play_audio_note(NOTE_EVENT_TYPE *play_event)
{
/* Note that the finding of earliest event to pre-empt fails when
** The number of events wraps around at 2^31 ! by stopping the wrong
** voice */
auto int oct,
pitch,
volume,
audio_chan,
chair,
sample_rate,
total_samples,
repeat_samples,
no_repeat,
chan_ctr;
static double duration,
frequency;
auto struct IOAudio *os_temp = NULL,
*rep_temp = NULL,
*alloc_temp = NULL,
*free_temp = NULL,
*finish_temp = NULL;
static int audio_index = 0; /* must be static to persist! */
if (0 == play_event->nv_i_dynamic)
/* If this is a note turn-off */
{
audio_chan = play_event->nv_i_audio_chan;
/* Turn off the note */
finish_temp = voices_aud[audio_chan].api_aud_finish;
finish_temp = voices_aud[audio_chan].api_aud_finish;
finish_temp->ioa_Request.io_Flags
= (UBYTE)0; /* won't reply port if IOF_QUICK is set */
finish_temp->ioa_AllocKey = voices_aud[audio_chan].api_key;
/* Tell the voice to finish */
/* It won't reply since IOF_QUICK is set; this is synchronous */
BeginIO((struct IORequest *) finish_temp);
while (0 == GetMsg(finish_temp->ioa_Request.io_Message
.mn_ReplyPort)) /*might be hanging here */
{
;
}
switch (finish_temp->ioa_Request.io_Error)
{
case 0:
/* free_temp->ioa_Request.io_Unit = units freed */
break; /* success */
case ADIOERR_NOALLOCATION:
play_event->nv_i_audio_chan = C_NO_CHAN;
play_event->nv_i_was_audio = 0;
return;
break; /* mismatch of channel and allocation key */
default:
break;
}
/* Free the channel */
free_temp = voices_aud[audio_chan].api_aud_free;
free_temp->ioa_Request.io_Flags = 0;
/*
** Will NOT reply if QUICK is set.
*/
BeginIO((struct IORequest *) free_temp);
while (0 == GetMsg(free_temp->ioa_Request.io_Message
.mn_ReplyPort)) /*might be hanging here */
{
;
}
switch (free_temp->ioa_Request.io_Error)
{
case 0:
/* free_temp->ioa_Request.io_Unit = units freed */
break; /* success */
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
voices_aud[audio_chan].api_key = 0;
voices_aud[audio_chan].api_status = C_API_QUIET;
play_event->nv_i_audio_chan = C_NO_CHAN;
play_event->nv_i_was_audio = 0;
return;
}
play_event->nv_i_was_audio = 1;
/*
** Try to find an unused voice first.
*/
chan_ctr = 0;
while ((voices_aud[chan_ctr].api_status != C_API_QUIET)
&& (chan_ctr < C_AUDIO_QTY))
{
chan_ctr++;
}
if (chan_ctr >= C_AUDIO_QTY)
{
audio_chan = audio_index;
/* then there was no free channel; all are playing now */
finish_temp = voices_aud[audio_chan].api_aud_finish;
finish_temp->ioa_Request.io_Flags = 0;
/* Tell the voice to finish */
/* It won't reply since IOF_QUICK is set; this is synchronous */
BeginIO((struct IORequest *) finish_temp);
while (0 == GetMsg(finish_temp->ioa_Request.io_Message
.mn_ReplyPort)) /*might be hanging here */
{
;
}
switch (finish_temp->ioa_Request.io_Error)
{
case 0:
/* free_temp->ioa_Request.io_Unit = units freed */
break; /* success */
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
free_temp = voices_aud[audio_chan].api_aud_free;
free_temp->ioa_Request.io_Flags = (UBYTE)0;
/*
** Will NOT reply if QUICK is set.
*/
BeginIO((struct IORequest *) free_temp);
while (0 == GetMsg(free_temp->ioa_Request.io_Message
.mn_ReplyPort)) /*might be hanging here */
{
;
}
switch (free_temp->ioa_Request.io_Error)
{
case 0:
/* free_temp->ioa_Request.io_Unit = units freed */
break; /* success */
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
}
else /* chan_ctr is an index to a free channel */
{
audio_chan = chan_ctr;
}
/*
** This approach to finding the audio voice must be replaced
** by something that allows 16 chairs to use 4 audio voices.
** It might be useful to know which voice was the last one running.
*/
os_temp = voices_aud[audio_chan].api_aud_os;
rep_temp = voices_aud[audio_chan].api_aud_rep;
alloc_temp = voices_aud[audio_chan].api_aud_alloc;
finish_temp = voices_aud[audio_chan].api_aud_finish;
free_temp = voices_aud[audio_chan].api_aud_free;
/*
** do a checkio here
*/
play_event->nv_i_audio_chan = audio_chan;
chair = play_event->nv_i_channel;
pitch = play_event->nv_i_cur_pitch;
duration = (double)play_event->nv_r_duration.tv_secs
+ ((double)play_event->nv_r_duration.tv_micro
/ 1000000.0);
volume = play_event->nv_i_dynamic >> 1;
if ((pitch > C_NOTES_TOTAL) || (pitch < 0))
{
pitch = middle_c_num;
}
frequency = frequency_table[pitch];
/*
** ADCMD_ALLOCATE
** synchronous if success and no locks to be stolen
** or if it fails with no-wait flag set
** replies if IOF_QUICK is clear
** otherwise is asynch and replies and clear IOF_QUICK
** Since I set NOWAIT, it won't be synchronous, that is it won't
** wait until channels can be stolen.
*/
alloc_temp->ioa_Request.io_Flags = (UBYTE)(IOF_QUICK | ADIOF_NOWAIT);
alloc_temp->ioa_AllocKey = 0;
BeginIO((struct IORequest *) alloc_temp);
switch (alloc_temp->ioa_Request.io_Error)
{
case 0:
break; /* successful */
case ADIOERR_ALLOCFAILED: /* manual said IOERR_ALLOCFAILED,
** but no such code exists */
/* channels would have to have been stolen */
return;
break;
default:
break;
}
switch ((int)(alloc_temp->ioa_Request.io_Unit))
{
case 0:
break; /* successful */
default:
break;
}
voices_aud[audio_chan].api_status = C_API_PLAYING;
finish_temp->ioa_Request.io_Unit =
free_temp->ioa_Request.io_Unit =
os_temp->ioa_Request.io_Unit =
rep_temp->ioa_Request.io_Unit =
alloc_temp->ioa_Request.io_Unit;
voices_aud[audio_chan].api_key =
finish_temp->ioa_AllocKey =
free_temp->ioa_AllocKey =
os_temp->ioa_AllocKey =
rep_temp->ioa_AllocKey =
alloc_temp->ioa_AllocKey;
oct = svx_info[chair].Vhdr.ctOctave;
if (svx_info[chair].spcycs[0] > 0)
{
do
{
oct--;
rep_temp->ioa_Period = os_temp->ioa_Period
= (UWORD) (main_clock / ((int)floor(frequency)
* svx_info[chair].spcycs[oct]));
} while ((os_temp->ioa_Period < 124) && (oct > 0));
/* preferred periods for anti-aliasing 124 to 256 */
}
else
{
rep_temp->ioa_Period = os_temp->ioa_Period
= (UWORD)(main_clock / svx_info[chair].Vhdr.samplesPerSec);
oct = 0;
}
#ifdef MEASURE
if (play_event->nv_i_dynamic != 0)
{
gi_notes_measure++;
}
#endif
sample_rate = main_clock / rep_temp->ioa_Period;
/*
** Force total_samples even
*/
total_samples = (int)((double)sample_rate * duration) & ~1;
/*
** If there is a one-shot part
*/
no_repeat = FALSE;
if (svx_info[chair].Vhdr.oneShotHiSamples > 0)
{
os_temp->ioa_Data = (BYTE *)(svx_info[chair].osamps[oct]);
if (total_samples > svx_info[chair].osizes[oct])
{
os_temp->ioa_Length = svx_info[chair].osizes[oct];
repeat_samples = (total_samples - os_temp->ioa_Length) & ~1;
}
else
{
os_temp->ioa_Length
= ((total_samples > 0) ? total_samples : 2);
no_repeat = TRUE;
}
os_temp->ioa_Volume = (UWORD) volume;
os_temp->ioa_Request.io_Flags |= (UBYTE)IOF_QUICK;
BeginIO((struct IORequest *) os_temp);
switch (os_temp->ioa_Request.io_Error)
{
case 0:
; /* successful;
** there will be a reply at the start of the write;
** quick should be clear
*/
if (os_temp->ioa_Request.io_Flags & (UBYTE)IOF_QUICK)
{
/* was successful and will reply later */
while (0 == GetMsg(os_temp->ioa_Request.io_Message
.mn_ReplyPort)) /*might be hanging here */
{
os_temp = os_temp;
}
}
break;
case IOERR_ABORTED:
/* no-op */
/* it was aborted or stolen; there won't be a sound.
** It won't reply because IOF_QUICK was set */
break;
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
}
/*
** If there is a repeat part to the sample
*/
if ((svx_info[chair].Vhdr.repeatHiSamples > 0) && !no_repeat)
{
rep_temp->ioa_Volume = (UWORD) volume;
rep_temp->ioa_Data = (BYTE *)(svx_info[chair].rsamps[oct]);
rep_temp->ioa_Cycles = (UWORD)((int)floor(frequency * duration)
* svx_info[chair].spcycs[oct]
/ svx_info[chair].rsizes[oct]);
if ((rep_temp->ioa_Cycles * svx_info[chair].rsizes[oct])
> repeat_samples)
{
rep_temp->ioa_Cycles = (UWORD)(repeat_samples
/ svx_info[chair].spcycs[oct]);
}
if (0 == rep_temp->ioa_Cycles)
{
rep_temp->ioa_Cycles = (UWORD) 1;
}
rep_temp->ioa_Length = (ULONG) svx_info[chair].rsizes[oct];
rep_temp->ioa_Request.io_Flags |= (UBYTE)IOF_QUICK;
BeginIO((struct IORequest *) rep_temp);
switch (rep_temp->ioa_Request.io_Error)
{
case 0:
; /* successful;
** there will be a reply at the start of the write;
** quick should be clear
*/
if (rep_temp->ioa_Request.io_Flags & (UBYTE)IOF_QUICK)
{
while (0 == GetMsg(rep_temp->ioa_Request.io_Message
.mn_ReplyPort))
{
;
}
; /* was successful and will reply later */
}
break;
case IOERR_ABORTED:
; /* it was aborted or stolen; there won't be a sound.
** It won't reply because IOF_QUICK was set */
break;
case ADIOERR_NOALLOCATION:
break; /* mismatch of channel and allocation key */
default:
break;
}
}
audio_index = (audio_index + 1) % C_AUDIO_QTY;
return;
}
static void calculate_frequency_table(double *frequency_table)
{
static int pitch,
octave;
for (octave = 0; octave < C_OCTAVES_QTY; octave++)
{
for (pitch = 0; pitch < C_NOTES_PER_OCTAVE; pitch++)
{
frequency_table[(octave * C_NOTES_PER_OCTAVE) + pitch]
= C_LOW_C
* pow2( ((double)(octave * C_NOTES_PER_OCTAVE) + pitch)
/ 12.0);
}
}
return;
}
static void init_audio_instruments(void)
{
static int octave,
sample,
instrument,
harmonic;
svx_info[C_DFLT_AUDIO].Vhdr.ctOctave = C_TYP_OCTAVES;
svx_info[C_DFLT_AUDIO].Vhdr.samplesPerSec = 0;
svx_info[C_DFLT_AUDIO].Vhdr.oneShotHiSamples = C_ONESHOT_SAMP;
svx_info[C_DFLT_AUDIO].Vhdr.repeatHiSamples = C_REPEAT_SAMP;
svx_info[C_DFLT_AUDIO].Vhdr.samplesPerHiCycle = C_ONESHOT_SAMP;
strncpy(svx_info[C_DFLT_AUDIO].name, "AlgoRhythms Default Tone",
C_INST_NAME_LEN);
svx_info[C_DFLT_AUDIO].name[sizeof svx_info[C_DFLT_AUDIO].name - 1]
= '\0';
svx_info[C_DFLT_AUDIO].Vhdr.volume = 0x0000FFFF;
for (octave = 0; octave < C_TYP_OCTAVES; octave++)
{
svx_info[C_DFLT_AUDIO].osizes[octave]
= C_ONESHOT_SAMP * (int)floor(pow2( (double)octave));
svx_info[C_DFLT_AUDIO].osamps[octave]
= (BYTE *)AllocMem(svx_info[C_DFLT_AUDIO].osizes[octave],
MEMF_CHIP | MEMF_PUBLIC);
if (svx_info[C_DFLT_AUDIO].osamps[octave] != NULL)
{
for ( sample = 0;
sample < svx_info[C_DFLT_AUDIO].osizes[octave];
sample++)
{
svx_info[C_DFLT_AUDIO].osamps[octave][sample] = 0;
for (harmonic = 1;
harmonic <= pow2( (double)octave);
harmonic++)
{
svx_info[C_DFLT_AUDIO].osamps[octave][sample]
+= sin((2.0 * PI) * (double)harmonic
* (double)sample
/ (double)(svx_info[C_DFLT_AUDIO].osizes[octave]))
* 126.0 / harmonic;
}
}
}
else
{
; /* ran out of chip ram */
}
svx_info[C_DFLT_AUDIO].rsizes[octave]
= C_REPEAT_SAMP * (int)floor(pow2( (double)octave));
svx_info[C_DFLT_AUDIO].rsamps[octave]
= (BYTE *)AllocMem(svx_info[C_DFLT_AUDIO].rsizes[octave],
MEMF_CHIP | MEMF_PUBLIC);
if (svx_info[C_DFLT_AUDIO].rsamps[octave] != NULL)
{
memcpy( svx_info[C_DFLT_AUDIO].rsamps[octave],
svx_info[C_DFLT_AUDIO].osamps[octave],
svx_info[C_DFLT_AUDIO].osizes[octave]);
}
else
{
; /* out of chip ram */
}
svx_info[C_DFLT_AUDIO].spcycs[octave]
= C_ONESHOT_SAMP * (int)floor(pow2( (double)octave));
}
for (instrument = 0; instrument < C_DFLT_AUDIO; instrument++)
{
svx_info[instrument] = svx_info[C_DFLT_AUDIO];
}
return;
}
static void de_init_audio_instruments(void)
{
auto int oct,
chair;
for (chair = 0; chair < C_DFLT_AUDIO; chair++)
{
if (svx_info[chair].rsamps[0] != svx_info[C_DFLT_AUDIO].rsamps[0])
{
for (oct = 0; oct < svx_info[chair].Vhdr.ctOctave; oct++)
{
if (svx_info[chair].osamps[oct] != NULL)
{
FreeMem(svx_info[chair].osamps[oct],
svx_info[chair].osizes[oct]);
svx_info[chair].osamps[oct] = NULL;
}
if (svx_info[chair].rsamps[oct] != NULL)
{
FreeMem(svx_info[chair].rsamps[oct],
svx_info[chair].rsizes[oct]);
svx_info[chair].rsamps[oct] = NULL;
}
}
}
}
/*
** Free up the default voice
*/
for (oct = 0; oct < C_TYP_OCTAVES; oct++)
{
if (svx_info[C_DFLT_AUDIO].osamps[oct] != NULL)
{
FreeMem(svx_info[C_DFLT_AUDIO].osamps[oct],
svx_info[C_DFLT_AUDIO].osizes[oct]);
svx_info[C_DFLT_AUDIO].osamps[oct] = NULL;
}
if (svx_info[C_DFLT_AUDIO].rsamps[oct] != NULL)
{
FreeMem(svx_info[C_DFLT_AUDIO].rsamps[oct],
svx_info[C_DFLT_AUDIO].rsizes[oct]);
svx_info[C_DFLT_AUDIO].rsamps[oct] = NULL;
}
}
return;
}
int read_8svx(char *path_str, const int chair)
{
auto unsigned char *body_buffer = NULL;
auto char *anno_buf = NULL;
static int oneshot_index[MAXOCT],
repeat_index[MAXOCT],
true_len,
octave,
*channels,
error;
static LONG chunk_list[C_CHUNKS_QTY * 2]
= {ID_8SVX, ID_VHDR, ID_8SVX, ID_NAME,
ID_8SVX, ID_AUTH, ID_8SVX, ID_ATAK, ID_8SVX, ID_RLSE,
ID_8SVX, ID_ANNO, ID_8SVX, ID_CHAN, ID_8SVX, ID_BODY};
auto struct IFFHandle *iff = NULL;
auto struct ContextNode *body_node;
auto struct StoredProperty *body_stored_property,
*vhdr_stored_property,
*anno_stored_property,
*name_stored_property,
*auth_stored_property,
*atak_stored_property,
*rlse_stored_property,
*chan_stored_property;
auto Voice8Header *voice_header;
if(NULL == (iff = AllocIFF()))
{
/* AllocIFF() failed. */
/* goto bye */;
}
if (NULL == (iff->iff_Stream = Open(path_str, MODE_OLDFILE)))
{
FreeIFF(iff);
return 1;
}
InitIFFasDOS(iff);
if (error = OpenIFF(iff, IFFF_READ))
{
Close(iff->iff_Stream);
FreeIFF(iff);
/*failed*/;
}
error = PropChunks(iff, chunk_list, C_CHUNKS_QTY);
error = StopChunk(iff, ID_8SVX, ID_BODY);
error = ParseIFF(iff, IFFPARSE_SCAN);
switch (error)
{
case IFFERR_EOF:
/* File scan complete. */
break;
case 0: /* found the body */
body_node = CurrentChunk(iff);
if (body_node != NULL)
{
body_buffer = (unsigned char *)malloc(body_node->cn_Size);
if (body_buffer != NULL)
{
ReadChunkBytes(iff, body_buffer, body_node->cn_Size);
}
}
break;
default:
/* printf("File scan aborted, error %1d: %s\n",
error, errormsgs[-error - 1]); */
break;
}
name_stored_property = FindProp(iff, ID_8SVX, ID_NAME);
auth_stored_property = FindProp(iff, ID_8SVX, ID_AUTH);
atak_stored_property = FindProp(iff, ID_8SVX, ID_ATAK);
rlse_stored_property = FindProp(iff, ID_8SVX, ID_RLSE);
chan_stored_property = FindProp(iff, ID_8SVX, ID_CHAN);
if (chan_stored_property != NULL)
{
/* printf("chan len: %d\n", chan_stored_property->sp_Size); */
channels = (int *)(chan_stored_property->sp_Data);
switch (*channels)
{
case RIGHT:
/* RIGHT channel */
break;
case LEFT:
/* LEFT channel */
break;
case BOTH:
/* BOTH channels */
break;
default:
break;
}
}
vhdr_stored_property = FindProp(iff, ID_8SVX, ID_VHDR);
if (vhdr_stored_property != NULL)
{
/* printf("vhdr len: %d\n", vhdr_stored_property->sp_Size); */
voice_header = (Voice8Header *)(vhdr_stored_property->sp_Data);
/* printf("\toneShotHiSamples %d\n\trepeatHiSamples %d\n\
\tsamplesPerHiCycle %d\n\tsamplesPerSec %d\n\
\tctOctave %d\n\tsCompression %d\n\
\tvolume X%08X\n", voice_header->oneShotHiSamples,
voice_header->repeatHiSamples,
voice_header->samplesPerHiCycle,
voice_header->samplesPerSec,
voice_header->ctOctave, voice_header->sCompression,
voice_header->volume); */
}
/* OK, this is the thing.
** If the instrument has no one shot,
** then don't try to play the one shot.
** If the instrument has no repeat
** then don't try to play the repeat part.
** If it has neither, don't load the instrument.
*/
anno_stored_property = FindProp(iff, ID_8SVX, ID_ANNO);
if (anno_stored_property != NULL)
{
/* printf("anno len: %d\n", anno_stored_property->sp_Size); */
if (1 == (anno_stored_property->sp_Size % 2))
{
true_len = anno_stored_property->sp_Size + 2;
}
else
{
/*
** need room for null
*/
true_len = anno_stored_property->sp_Size + 1;
}
anno_buf = malloc(true_len);
if (anno_buf != NULL)
{
memcpy( anno_buf, anno_stored_property->sp_Data,
anno_stored_property->sp_Size);
if (anno_stored_property->sp_Size >= 0)
{
anno_buf[anno_stored_property->sp_Size] = '\0';
free(anno_buf);
anno_buf = NULL;
}
else
{
/* annotation size is negative */
}
}
}
body_stored_property = FindProp(iff, ID_8SVX, ID_BODY);
/*
** compare the voice's first octave sample pointer to the default
** voice. If they are the same, it is just a copy of the default
** voice. If not, we must first de-allocate its loaded voice.
*/
if (svx_info[chair].osamps[0] != svx_info[C_DFLT_AUDIO].osamps[0])
{
for (octave = 0; octave < svx_info[chair].Vhdr.ctOctave; octave++)
{
if (svx_info[chair].osamps[octave] != NULL)
{
FreeMem(svx_info[chair].osamps[octave],
svx_info[chair].osizes[octave]);
svx_info[chair].osamps[octave] = NULL;
}
if (svx_info[chair].rsamps[octave] != NULL)
{
FreeMem(svx_info[chair].rsamps[octave],
svx_info[chair].rsizes[octave]);
svx_info[chair].rsamps[octave] = NULL;
}
}
}
memset(&svx_info[chair], '\0', sizeof(struct EightSVXInfo));
svx_info[chair].Vhdr = *voice_header;
for (octave = 0;
(octave < voice_header->ctOctave) && (octave < MAXOCT);
octave++)
{
oneshot_index[octave]
= (voice_header->repeatHiSamples
+ voice_header->oneShotHiSamples)
* ((int)floor(pow2((double)octave)) - 1);
/*
** If there is a one shot part
*/
if (voice_header->oneShotHiSamples > 0)
{
svx_info[chair].osizes[octave]
= voice_header->oneShotHiSamples
* (int)floor(pow2((double)octave));
svx_info[chair].osamps[octave]
= (BYTE *)AllocMem(svx_info[chair].osizes[octave],
MEMF_CHIP | MEMF_PUBLIC);
if (svx_info[chair].osamps[octave] != NULL)
{
memcpy(svx_info[chair].osamps[octave],
&body_buffer[oneshot_index[octave]],
svx_info[chair].osizes[octave]);
}
else
{
; /* no more chip ram */
}
}
/*
** If there is a repeat part
*/
if (voice_header->repeatHiSamples > 0)
{
svx_info[chair].rsizes[octave]
= voice_header->repeatHiSamples
* (int)floor(pow2((double)octave));
repeat_index[octave]
= oneshot_index[octave] + svx_info[chair].osizes[octave];
svx_info[chair].rsamps[octave]
= (BYTE *)AllocMem(svx_info[chair].rsizes[octave],
MEMF_CHIP | MEMF_PUBLIC);
if (svx_info[chair].rsamps[octave] != NULL)
{
memcpy(svx_info[chair].rsamps[octave],
&body_buffer[repeat_index[octave]],
svx_info[chair].rsizes[octave]);
}
else
{
/* out of chip ram */
}
}
svx_info[chair].spcycs[octave]
= voice_header->samplesPerHiCycle
* (int)floor(pow2((double)octave));
}
if (body_buffer != NULL)
{
free(body_buffer);
body_buffer = NULL;
}
if (anno_buf != NULL)
{
free(anno_buf);
anno_buf = NULL;
}
strncpy(orch_str[chair], path_str, C_INST_NAME_LEN);
#ifndef CLI
set_orch_gadgets(chair, path_str);
#endif
CloseIFF(iff);
Close(iff->iff_Stream);
FreeIFF(iff);
return 0;
}
#ifndef CLI
void open_orch_window(void)
{
auto UWORD topborder;
if (orch_wind != NULL)
{
return;
}
topborder = main_screen->WBorTop
+ (main_screen->Font->ta_YSize + 1);
if (NULL == create_orch_gadgets(topborder))
{
;
}
else
{
if (NULL == (orch_wind = OpenWindowTags(NULL,
WA_Title, "ORCHESTRA",
WA_Left, 25L,
WA_Gadgets, glist, WA_AutoAdjust, TRUE,
WA_Width, 600, WA_MinWidth, 600,
WA_InnerHeight, 176, WA_MinHeight, 186,
WA_DragBar, TRUE, WA_DepthGadget, TRUE,
WA_Activate, TRUE,
#if 0
WA_CloseGadget, TRUE,
#endif
WA_SmartRefresh, TRUE,
WA_IDCMP, BUTTONIDCMP /* | IDCMP_CLOSEWINDOW */,
WA_CustomScreen, main_screen, TAG_END)))
{
;
}
else
{
GT_RefreshWindow(orch_wind, NULL);
orch_mask = 1 << orch_wind->UserPort->mp_SigBit;
}
}
return;
}
void close_orch(void)
{
RemoveGList(orch_wind, glist, GAD_LAST);
CloseWindowSafely(orch_wind);
FreeGadgets(glist);
glist = NULL;
orch_wind = NULL;
return;
}
static struct Gadget *create_orch_gadgets(UWORD topborder)
{
auto struct NewGadget ng;
auto struct Gadget *gad;
auto int chair_ctr;
static char label_str[16][16];
gad = CreateContext(&glist);
ng.ng_TextAttr = &font_choice;
ng.ng_VisualInfo = vi;
/*
** Set up Voice selection
*/
ng.ng_Height = 11;
for (chair_ctr = 0; chair_ctr < GAD_BUTTON_1; chair_ctr++)
{
ng.ng_Flags = NG_HIGHLABEL | PLACETEXT_LEFT;
ng.ng_Width = 400;
ng.ng_LeftEdge = 90;
ng.ng_GadgetText = NULL;
ng.ng_TopEdge = 11 + (11 * chair_ctr);
ng.ng_GadgetID = chair_ctr;
strncpy(orch_str[chair_ctr], T_DFLT_NAME, C_INST_NAME_LEN);
orch_str[chair_ctr][C_INST_NAME_LEN - 1] = '\0';
orch_gads[chair_ctr]
= gad
= CreateGadget(TEXT_KIND, gad, &ng, GTTX_Text,
orch_str[chair_ctr], TAG_END, 0L);
/*
** Set up buttons
*/
ng.ng_Width = 85;
ng.ng_LeftEdge = 1;
sprintf(label_str[chair_ctr], "Chair %2d:", chair_ctr + 1);
ng.ng_GadgetText = label_str[chair_ctr];
ng.ng_Flags = 0L;
ng.ng_GadgetID = GAD_BUTTON_1 + chair_ctr;
orch_gads[chair_ctr + GAD_BUTTON_1]
= gad
= CreateGadget(BUTTON_KIND, gad, &ng, TAG_END, 0L);
}
return gad;
}
void set_orch_gadgets(int chair, char *instrument_name)
{
auto char temp_instr_name[C_INST_NAME_LEN];
strncpy(temp_instr_name, instrument_name, C_INST_NAME_LEN);
temp_instr_name[C_INST_NAME_LEN - 1] = '\0';
GT_SetGadgetAttrs(orch_gads[chair], orch_wind, NULL,
GTTX_Text, temp_instr_name, TAG_END, 0L);
return;
}
#endif
int process_audio_events(void)
{
static char instr_path[64] = "\0",
instr_file[64] = "\0",
instr_dir[64] = "\0",
instr_banner[] = "Load IFF/8SVX file";
auto int result = 0,
asl_result,
sts,
Closing = FALSE;
auto ULONG imsgClass;
auto UWORD imsgCode;
auto struct Gadget *gad;
auto struct IntuiMessage *imsg;
static struct FileRequester *asl_request;
static struct TagItem asl_tags[8] =
{{ASL_Dir, NULL}, {ASL_File, NULL}, {ASL_Hail, NULL},
{ASL_Window, NULL}, {ASL_LeftEdge, 320}, {ASL_TopEdge, 10},
{ASL_Height, 180}, {TAG_DONE, NULL}};
while (imsg = GT_GetIMsg(orch_wind->UserPort))
{
gad = (struct Gadget *)imsg->IAddress;
imsgClass = imsg->Class;
imsgCode = imsg->Code;
GT_ReplyIMsg(imsg);
switch(imsgClass)
{
case IDCMP_CLOSEWINDOW:
Closing = TRUE;
break;
case IDCMP_GADGETUP:
asl_tags[0].ti_Data = (ULONG)instr_dir;
asl_tags[1].ti_Data = (ULONG)instr_file;
asl_tags[2].ti_Data = (ULONG)instr_banner;
asl_tags[3].ti_Data = (ULONG)orch_wind;
asl_request = AllocAslRequest(ASL_FileRequest, asl_tags);
asl_result = AslRequest(asl_request, NULL);
if (NULL == asl_result)
{
DisplayBeep(NULL);
}
else
{
strcpy(instr_dir, asl_request->rf_Dir);
strcpy(instr_file, asl_request->rf_File);
strcpy(instr_path, asl_request->rf_Dir);
if ((instr_path[strlen(instr_path)- 1] != ':')
&& (strlen(instr_path) != 0))
{
strcat(instr_path, "/");
}
strcat(instr_path, asl_request->rf_File);
sts = read_8svx(instr_path, gad->GadgetID
- GAD_BUTTON_1);
if (sts)
{
DisplayBeep(NULL);
}
}
result = 1;
FreeAslRequest(asl_request);
break;
default:
break;
}
}
if (Closing)
{
#ifndef CLI
close_orch();
#endif
}
return result;
}
void fprint_orch(FILE * file)
{
auto int ctr;
for (ctr = 0; ctr < (C_INSTR_QTY - 1); ctr++)
{
if (strcmp(orch_str[ctr], T_DFLT_NAME) != 0)
{
fprintf(file, "%2d %s\n", ctr, orch_str[ctr]);
}
}
return;
}